HEAD ======= >>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1 <<<<<<< HEAD
======= >>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1 <<<<<<< HEAD
----------------------------------------------------------------------------------------
1) 'Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning'
----------------------------------------------------------------------------------------
def Week1 C1 "A new Programming Paradigm":
- C1_W1_Lab_1_hello_world_nn (numbers) ".Dense(units=1, input_shape=[1])"
- C1W1_Assignment_0 (numbers) "optimizer='sgd', loss='mean_squared_error'"
def Week2 C1 "Introduction to Computer Vision":
- C1_W2_Lab_1_beyond_hello_world (fashion_mnist) ".Flatten(), activation=tf.nn.relu,
activation=tf.nn.softmax, optimizer = tf.optimizers.Adam()"
- C1_W2_Lab_2_callbacks (fashion_mnist) "loss = 'sparse_categorical_crossentropy' "
- C1W2_Assignment_Callbacks_handwriting0_9 "metrics=['accuracy'] "
def Week3 C1 "Enhancing Vision with Convolutional Neural Networks":
- C1_W3_Lab_1_improving_accuracy_using_convolutions (fashion_mnist)
"optimizer='adam', model.evaluate(test_images, test_labels) "
- C1_W3_Lab_2_exploring_convolutions_Max Pooling(scipy, photo)
"convolution (filter, weight), Max Pooling (max(pixels))"
- C1W3_Assignment (mnist.npz)_Convolutions
"activation='relu', .Conv2D(), .MaxPooling2D() "
def Week4 C1 "Using Real-world Images":
- C1_W4_Lab_1_image_generator_no_validation ('horse-or-human.zip')
"activation='sigmoid', loss='binary_crossentropy', optimizer=RMSprop(learning_rate=0.001)"
- C1_W4_Lab_2_image_generator_with_validation (horse-or-human+validation)
"ImageDataGenerator, train, valid, class_mode='binary', validation_datagen "
- C1_W4_Lab_3_compacted_images (horse-or-human+validation)
"target_size = (150, 150) "
- C1W4_Assignment (data_happy_sad.zip)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
----------------------------------------------------------------------------------------
2) 'Convolutional Neural Networks in TensorFlow'
----------------------------------------------------------------------------------------
def Week1 C2 "Exploring a Larger Dataset":
- C2_W1_Lab_1_cats_vs_dogs_Kaggle_small_set
".Conv2D(), .MaxPooling2D() "
- C2W1_Assignment_cats-and-dogs_big_set
def Week2 C2 "Augmentation- A technique to avoid overfitting":
- C2_W2_Lab_1_cats_v_dogs_filtered_augmentation
- C2_W2_Lab_2_horses_v_humans_validation_augmentation
- C2W2_Assignment_augmentation_CatsDogsFULL_(cats-and-dogs.zip)
def Week3 C2 "Transfer Learning":
- C2_W3_Lab_1_transfer_learning_drop_(cats_and_dogs_filtered)
"InceptionV3(include_top = False), model.load_weights(), layer.trainable = False "
- C2W3_Assignment_augmentation_transver_drop(horse-or-human.zip)
"x = layers.Flatten()(last_output), x = layers.Dropout(0.2)(x) "
def Week4 C2 "Multiclass Classifications":
- C2_W4_Lab_1_multi_class_classifier(rock, paper, scissors)rps.zip
"loss = 'categorical_crossentropy', activation='softmax', class_mode='categorical' "
- C2W4_Assignment_Multi-class_Classification(english alphabet)
"csv.reader(), np.expand_dims(training_images, -1), zip(*data), np.array(labels, dtype=np.float64), train_datagen.flow() "
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
----------------------------------------------------------------------------------------
3) 'Natural Language Processing in TensorFlow'
----------------------------------------------------------------------------------------
def Week1 C3 "Sentiment in text":
- C3_W1_Lab_1_tokenize_basic | ".fit_on_texts(sentences), .word_index"
- C3_W1_Lab_2_sequences_basic | "oov_token='<OOV>', .texts_to_sequences(sentences),
pad_sequences(sequences, maxlen=5), truncating='post', padding='post', truncating='pre' "
- C3_W1_Lab_3_sarcasm_Kaggle | "with open('./sarcasm.json', 'r') as f:, datastore = json.load(f)"
- C3W1_Assignment_(BBC News) | "csv.reader(csvfile, delimiter=','), remove_stopwords('I am about'), tokenize_labels()"
def Week2 C3 "Word Embeddings":
- C3_W2_Lab_1_imdb | ".Embedding(vocab_size, embedding_dim, input_length=max_length), embedding_layer.get_weights()[0], files.download('vecs.tsv'), files.download('meta.tsv')"
- C3_W2_Lab_2_sarcasm_classifier | "json.load(f), .GlobalAveragePooling1D(), files.download('vecs.tsv'), files.download('meta.tsv')"
- C3_W2_Lab_3_imdb_subwords | "tokenizer_plaintext, tokenizer_subwords, 6307 ----> Ten, 2327 ----> sor,train_data, test_data = imdb_subwords['train'], db_subwords['test']"
- C3W2_Assignment (Diving deeper into the BBC News archive) | "def_remove_stopwords(), def_parse_data_from_file(csv.reader()), .set_seed(123), .GlobalAveragePooling1D()"
def Week3 C3 "Assessments Overdue":
- C3_W3_Lab_1_single_layer_LSTM(subword) | ".Embedding(), .Bidirectional(LSTM(32)), "
- C3_W3_Lab_2_multiple_layer_LSTM(subword) | ".Bidirectional(LSTM(32), return_sequences=True), .Bidirectional(LSTM(32))"
- C3_W3_Lab_3_Conv1D | ".Embedding(), .Conv1D(filters, kernel_size, activation='relu'), .GlobalMaxPooling1D() "
- C3_W3_Lab_4_imdb_reviews_with_GRU_LSTM_Conv1D | ".Flatten(), .Bidirectional(.LSTM(32)), .Bidirectional(.GRU(32)), .Conv1D(filters, kernel_size, activation='relu'), .GlobalMaxPooling1D()"
- C3_W3_Lab_5_sarcasm_with_bi_LSTM | ".Bidirectional(tf.keras.layers.LSTM(lstm_dim))"
- C3_W3_Lab_6_sarcasm_with_1D_conv | ".Conv1D(128, 5, activation='relu'), .GlobalMaxPooling1D()"
- C3W3_Assignment_Exploring Overfitting in NLP(Sentiment140 dataset) | "only 10% dataset, linregress(), weights=[embeddings_matrix], Best practice"
def Week4 C3 "Sequence models and literature":
- C3_W4_Lab_1_Generating Text_NN | "Dense(total_words), .to_categorical(labels), model.add(Bidirectional(LSTM(20))), seed_text='Laurence went to Dublin'"
- C3_W4_Lab_2_irish_lyrics_1,692_sentences | "new_word=tokenizer.index_word[predicted], predicted=np.argsort(probabilities)[0][-choice]"
- C3_W4_Lab_3_to generating Shakespeare Tensorflow RNN | "that uses character-based prediction, class MyModel(), class OneStep(), ids_from_chars, chars_from_ids, BIG INFORMATION"
- C3W4_Assignment_Shakespeare classic RNN | ".add(Embedding(), .add(Bidirectional(LSTM(128)))"
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
----------------------------------------------------------------------------------------
4) 'Sequences, Time Series and Prediction '
----------------------------------------------------------------------------------------
def Week1 C4 "Sequences and Prediction": | ""
- C4_W1_Lab_1_time_series | "trend, seasonality_pattern, seasonality, noise, autocorrelation, impulses, autocorrelation_impulses, Non-stationary Time Series"
- C4_W1_Lab_2_forecasting | "Naive Forecast mse=61,8, mae=5,9, Moving Average mse=106.6, mae=7.1, Differencing mse=53.7, mae=5.9, Smoothing mse=34.3, mae=4.6"
- C4W1_Assignment_time_series | "mse:19.58, mae:2.60 for naive, mse:65.79, mae:4.30 for moving average, mse:8.50, mae:2.33 for moving average plus past, mse:12.53, mae:2.20 for moving average plus smooth past"
def Week2 C4 "Deep Neural Networks for Time Series":
- C4_W2_Lab_1_Preparing_Time_Series_features_and_labels | "tf.data.Dataset.range(10); dataset.window(size=5, shift=1, drop_remainder=True), .flat_map(lambda window: window.batch(5)), .map(lambda window: (window[:-1], window[-1])), .shuffle(buffer_size=10), .batch(2).prefetch(1) "
- C4_W2_Lab_2_single_layer_NN(Time series) | "dataset.batch(batch_size).prefetch(1); tf.keras.layers.Dense(1, input_shape=[window_size]); tf.keras.models.Sequential([l0]); model.fit(dataset, epochs=100); mse,mae:46,7; 5,1"
- C4_W2_Lab_3_deep_NN(Time series) | "tf.keras.layers.Dense(10, ...); tf.keras.layers.Dense(10, ...); tf.keras.layers.Dense(1), mse,mae:45,7;5,01; tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9); mse,mae:42,2; 4,8"
- C4W2_Assignment_DNN(Time series): | "plot_series(), generate_time_series(), @dataclass, train_val_split(), windowed_dataset(), create_model(), compute_metrics(), generate_forecast(), mse:29.33, mae:3.33"
def Week3 C4 "Recurrent Neural Networks for Time Series":
- C4_W3_Lab_1_RNN_for_time_series | "tf.keras.layers.Lambda(), .SimpleRNN(), .SimpleRNN(), .Dense(1), .Lambda(), learning_rate, mse:65.11, mae:6.25"
- C4_W3_Lab_2_LSTM_for_time_series | ".clear_session(), .Lambda(), .Bidirectional(tf.keras.layers.LSTM(), .Bidirectional(.LSTM(), .Dense(1), .Lambda(), .compile(loss=tf.keras.losses.Huber(), learning_rate, mse:58.02, mae:5.48"
- C4W3_Assignment RNNs(time series) | ".Lambda(), .Bidirectional(), .LSTM(32), create_uncompiled_model(), adjust_learning_rate(), create_model(), compute_metrics(), model_forecast()_ achieve an MAE of 4.5 or less, mse:28.74, mae:3.21, ! tar-czvf saved_model.tar.gz saved_model/"
def Week1 C4 "Real-world time series data":
- C4_W4_Lab_1_Convolutions_with_LSTM | "init_weights = model.get_weights(), tf.keras.backend.clear_session(), model.set_weights(init_weights), .Conv1D(), .LSTM(64), .LSTM(64), .Lambda(); mse:46.32,mae:5.23(58,02; 5,8) "
- C4_W4_Lab_2_Sunspots_DNN_Kaggle | ".LearningRateScheduler(), .clear_session(), .Dense(30), .Dense(10), .Dense(1); model_forecast(): for predict; mae:14,93"
- C4_W4_Lab_3_Sunspots_CNN_RNN_DNN | "windowed_dataset(): for tuples features, labels; .Conv1D(), .LSTM(64), .LSTM(64), .Dense(30), .Dense(10), .Dense(1), .Lambda(), epochs=500,mae:14.43; epochs=100,mae:14.02; epochs=40,.ExponentialDecay(),mae:14,26, other lr_schedule; epochs=10,mae:14.92;"
- C4W4_Assignment_min_temp_Melbourne | ".Conv1D(), .LSTM(64), .LSTM(64), .Dense(30), .Dense(10), .Dense(5), .Dense(1): MSE<=6,MAE<=2: 5,41, 1,82"
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
C1W1L1_MachineLearning_DeepLearning.ipynb
---------------------------------------------------------------
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs, ys, epochs=500)
prediction = model.predict([10.0])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C1W1LwAssignment_ML_DL_HousingPrices.ipynb
---------------------------------------------------------------
model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs, ys, epochs=1000)
prediction = model.predict([new_x])[0]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C1W2L1_ComputerVisionMNIST_clothing_images.ipynb
---------------------------------------------------------------
training_images = training_images / 255.0
test_images = test_images / 255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
C1W2L2_MNIST_Callbacks.ipynb
---------------------------------------------------------------
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.optimizers.Adam(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('loss') < 0.4):
print("\nLoss is lower than 0.4 so cancelling training!")
self.model.stop_training = True
<<<<<<< HEAD
callbacks = myCallback()
model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
=======
callbacks = myCallback()
model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
C1W2LwAssignment_datasetMNIST_handwriting0_9.ipynb
---------------------------------------------------------------
x_train = x_train / 255.0
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epochs, logs={}):
if logs.get("accuracy") is not None and logs.get("accuracy") > 0.99:
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
<<<<<<< HEAD
callbacks = myCallback()
=======
callbacks = myCallback()
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def train_mnist(x_train, y_train):
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax) ])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
<<<<<<< HEAD
return history
hist = train_mnist(x_train, y_train)
=======
return history
hist = train_mnist(x_train, y_train)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C1W3L1_MNISTfashin_DL_Convolutions.ipynb
---------------------------------------------------------------
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax) ])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
print(f'\nMODEL EVALUATION')
test_loss = model.evaluate(test_images, test_labels)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get('loss') < 0.3):
print("\nLoss is lower than 0.3 so cancelling training!")
self.model.stop_training = True
<<<<<<< HEAD
callbacks = myCallback()
=======
callbacks = myCallback()
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax) ])
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5, callbacks=[callbacks])
print(f'\nMODEL EVALUATION')
test_loss = model.evaluate(test_images, test_labels)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C1W3L2_convolutions_filters_and_pools.ipynb
---------------------------------------------------------------
filter = [ [-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
weight = 1
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
for x in range(1, size_x-1):
for y in range(1, size_y-1):
convolution = 0.0
convolution = convolution + (ascent_image[x-1, y-1] * filter[0][0])
convolution = convolution + (ascent_image[x-1, y] * filter[0][1])
convolution = convolution + (ascent_image[x-1, y+1] * filter[0][2])
convolution = convolution + (ascent_image[x, y-1] * filter[1][0])
convolution = convolution + (ascent_image[x, y] * filter[1][1])
convolution = convolution + (ascent_image[x, y+1] * filter[1][2])
convolution = convolution + (ascent_image[x+1, y-1] * filter[2][0])
convolution = convolution + (ascent_image[x+1, y] * filter[2][1])
convolution = convolution + (ascent_image[x+1, y+1] * filter[2][2])
<<<<<<< HEAD
convolution = convolution * weight
=======
convolution = convolution * weight
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
if (convolution<0):
convolution=0
if (convolution>255):
convolution=255
<<<<<<< HEAD
# Load into the transformed image
image_transformed[x, y] = convolution
# Effect of Max Pooling
new_x = int(size_x/2)
new_y = int(size_y/2)
# Create blank image with reduced dimensions
newImage = np.zeros((new_x, new_y))
=======
# Load into the transformed image
image_transformed[x, y] = convolution
# Effect of Max Pooling
new_x = int(size_x/2)
new_y = int(size_y/2)
# Create blank image with reduced dimensions
newImage = np.zeros((new_x, new_y))
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Iterate over the image
for x in range(0, size_x, 2):
for y in range(0, size_y, 2):
# Store all the pixel values in the (2,2) pool
pixels = []
pixels.append(image_transformed[x, y])
pixels.append(image_transformed[x+1, y])
pixels.append(image_transformed[x, y+1])
pixels.append(image_transformed[x+1, y+1])
# Get only the largest value and assign to the reduced image
newImage[int(x/2), int(y/2)] = max(pixels)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C1W3LwAssignment_Improve_MNIST_with_Convolutions.ipynb
---------------------------------------------------------------
def reshape_and_normalize(images):
# Reshape the images to add an extra dimension
images = images.reshape((60000, 28, 28, 1))
# Normalize pixel values
images = images / 255.0
return images
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get('accuracy') > 0.995):
print("\Reached 99.5% accuracy so cancelling training!")
self.model.stop_training = True
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Convolutional Model
def convolutional_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(28,28,1)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(352, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax') ])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
<<<<<<< HEAD
from tensorflow.python.ops.check_ops import assert_less
=======
from tensorflow.python.ops.check_ops import assert_less
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = convolutional_model()
model_params = model.count_params()
assert model_params < 1000000, (
f'Your model has {model_params:, } params. For successful grading, please keep it '
f'under 1,000,000 by reducing the number of units in your Conv2D and/or Dense layers.')
<<<<<<< HEAD
callbacks = myCallback()
history = model.fit(training_images, training_labels, epochs=10, callbacks=[callbacks])
=======
callbacks = myCallback()
history = model.fit(training_images, training_labels, epochs=10, callbacks=[callbacks])
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C1W4L1_image_generator_no_validation.ipynb
---------------------------------------------------------------
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(300, 300, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid') ])
model.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(learning_rate=0.001),
metrics=['accuracy'])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Data Preprocessing
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1/255.0)
# Flow training images in batches of 128 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
'./horse-or-human/',
target_size=(300, 300),
batch_size=128,
class_mode='binary' )
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
history = model.fit(train_generator,
steps_per_epoch=8,
epochs=15,
verbose=2)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Model Prediction
if classes[0]>0.5:
print(fn + '\nis a human')
else:
print(fn + '\nis a horse')
<<<<<<< HEAD
# Visualizing Intermediate Representations
=======
# Visualizing Intermediate Representations
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C1W4L2_image_generator_with_validation.ipynb
---------------------------------------------------------------
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(300, 300, 3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid') ])
model.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(learning_rate=0.001),
metrics=['accuracy'])
<<<<<<< HEAD
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1/255.0)
validation_datagen = ImageDataGenerator(rescale = 1/255.0)
=======
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1/255.0)
validation_datagen = ImageDataGenerator(rescale = 1/255.0)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
train_generator = train_datagen.flow_from_directory(
'./horse-or-human/',
target_size=(300, 300),
batch_size=128,
class_mode='binary')
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
validation_datagen generator
validation_generator = validation_datagen.flow_from_directory(
'./validation-horse-or-human/',
target_size=(300, 300),
batch_size=32,
class_mode='binary' )
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
history = model.fit(
train_generator,
steps_per_epoch=8,
epochs=15,
verbose=1,
validation_data = validation_generator,
validation_steps=8)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Model Prediction
if classes[0]>0.5:
print(fn + "\n is a human")
else:
print(fn + "\n is a horse")
<<<<<<< HEAD
# Visualizing Intermediate Representations
=======
# Visualizing Intermediate Representations
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C1W4L3_compacted_images.ipynb
---------------------------------------------------------------
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape = (150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(learning_rate=0.001),
metrics=['accuracy'])
<<<<<<< HEAD
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1/255)
validation_datagen = ImageDataGenerator(rescale=1/255)
=======
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1/255)
validation_datagen = ImageDataGenerator(rescale=1/255)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Flow training images in batches of 128 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
'./horse-or-human/',
target_size = (150, 150),
batch_size = 128,
class_mode = 'binary')
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Flow training images in batches of 128 using train_datagen generator
validation_generator = validation_datagen.flow_from_directory(
'./validation-horse-or-human/',
target_size = (150, 150),
batch_size = 32,
class_mode = 'binary')
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
history = model.fit(
train_generator,
steps_per_epoch = 8,
epochs = 15,
verbose = 1,
validation_data = validation_generator,
validation_steps = 8)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Model Prediction
if classes[0]>0.5:
print(fn + "\n is a human")
else:
print(fn + "\n is a horse")
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C1W4LwAssignment_Happy_Sad_Dataset.ipynb
---------------------------------------------------------------
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if logs.get('accuracy') is not None and logs.get('accuracy') > 0.999:
print("\nReached 99.9% accuracy so cancelling training!")
self.model.stop_training = True
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def image_generator():
train_datagen = ImageDataGenerator(rescale=1/255.0)
train_generator = train_datagen.flow_from_directory(directory=base_dir,
target_size=(150, 150),
batch_size=10,
class_mode='binary')
return train_generator
<<<<<<< HEAD
gen = image_generator()
=======
gen = image_generator()
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def train_happy_sad_model(train_generator):
callbacks = myCallback()
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid') ])
model.compile(loss=losses.BinaryCrossentropy(),
optimizer = optimizers.RMSprop(),
metrics = ['accuracy'])
history = model.fit(x=gen,
epochs=20,
verbose=1,
callbacks=[callbacks])
return history
<<<<<<< HEAD
hist = train_happy_sad_model(gen)
=======
hist = train_happy_sad_model(gen)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C2W1L1_CNNs_CatsAndDog.ipynb
---------------------------------------------------------------
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid') ])
model.summary()
<<<<<<< HEAD
from tensorflow.keras.optimizers import RMSprop
model.compile(optimizer=RMSprop(learning_rate=0.001),
loss='binary_crossentropy',
metrics=['accuracy'])
=======
from tensorflow.keras.optimizers import RMSprop
model.compile(optimizer=RMSprop(learning_rate=0.001),
loss='binary_crossentropy',
metrics=['accuracy'])
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0/255.0)
test_datagen = ImageDataGenerator(rescale=1.0/255.0)
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size=20,
class_mode='binary',
target_size=(150, 150))
validation_generator = test_datagen.flow_from_directory(validation_dir,
batch_size=20,
class_mode='binary',
target_size = (150, 150))
history = model.fit(
train_generator,
epochs=15,
validation_data=validation_generator,
verbose=2 )
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Model Prediction
if classes[0]>0.5:
print(fn + " is a dog")
else:
print(fn + " is a cat")
<<<<<<< HEAD
# Visualizing Intermediate Representations
=======
# Visualizing Intermediate Representations
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Plot training and validation accuracy per epoch
# Plot training and validation loss per epoch
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C2W1_Assignment_cats-and-dogs_big_set.ipynb
---------------------------------------------------------------
def train_val_generators(TRAINING_DIR, VALIDATION_DIR):
train_datagen = ImageDataGenerator(rescale = 1.0 / 255.0)
train_generator = train_datagen.flow_from_directory(
directory = TRAINING_DIR,
batch_size = 150,
class_mode = 'binary',
target_size = (150, 150) )
validation_datagen = ImageDataGenerator(rescale = 1.0 / 255.0)
validation_generator = validation_datagen.flow_from_directory(
directory = VALIDATION_DIR,
batch_size = 150,
class_mode = 'binary',
target_size = (150, 150) )
return train_generator, validation_generator
train_generator, validation_generator = train_val_generators(TRAINING_DIR, VALIDATION_DIR)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid') ])
from tensorflow.keras.optimizers import RMSprop
model.compile(optimizer = RMSprop(learning_rate = 0.001),
loss = 'binary_crossentropy',
metrics = ['accuracy'])
return model
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = create_model()
history = model.fit(train_generator,
epochs = 15,
verbose = 2,
validation_data=validation_generator)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Plot training and validation accuracy per epoch
# Plot training and validation loss per epoch
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C2W2L1_Augmentation_CatsDogs.ipynb
---------------------------------------------------------------
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid') ])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(learning_rate=1e-4),
metrics=['accuracy'])
return model
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary' )
test_datagen generator
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary' )
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
EPOCHS = 100
model = create_model()
history = model.fit(
train_generator,
steps_per_epoch=100, # 2000 images = batch_size * steps
epochs=EPOCHS,
validation_data=validation_generator,
validation_steps=50, # 1000 images = batch_size * steps
verbose=1 )
<<<<<<< HEAD
import matplotlib.pyplot as plt
=======
import matplotlib.pyplot as plt
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def plot_loss_acc(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training_accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation_accuracy')
plt.title('Training adn validation accuracy')
plt.grid(True)
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation los')
plt.grid(True)
plt.legend()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Create new model
model_for_aug = create_model()
# This code has changed. Now instead of the ImageGenerator just rescaling
# the image, we also rotate and do other operations
train_datagen = ImageDataGenerator(
rescale = 1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
history_with_aug = model_for_aug.fit(
train_generator,
steps_per_epoch=100, # 2000 images = batch_size * steps
epochs=EPOCHS,
validation_data=validation_generator,
validation_steps=50, # 1000 images = batch_size * steps
verbose=1)
<<<<<<< HEAD
plot_loss_acc(history_with_aug)
=======
plot_loss_acc(history_with_aug)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C2W2L2_Augmentation_horses_v_humans.ipynb
---------------------------------------------------------------
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(300, 300, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
<<<<<<< HEAD
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid') ])
=======
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid') ])
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(learning_rate=1e-4),
metrics=['accuracy'])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest' )
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'tmp/horse-or-human/',
target_size=(300, 300),
batch_size=128,
class_mode='binary' )
validation_generator = validation_datagen.flow_from_directory(
'tmp/validation-horse-or-human/',
target_size=(300, 300),
batch_size=32,
class_mode='binary' )
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
EPOCHS = 100
history = model.fit(
train_generator,
steps_per_epoch=8,
epochs=EPOCHS,
verbose=1,
validation_data = validation_generator,
validation_steps=8)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C2W2LwAssignment_TackleOverfitting_Augmentation_CatsDogsFULL.ipynb
---------------------------------------------------------------
def train_val_generators(TRAININT_DIR, VALIDATION_DIR):
train_datagen = ImageDataGenerator(rescale=1./255.,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_generator = train_datagen.flow_from_directory(directory=TRAININT_DIR,
batch_size=29,
class_mode='binary',
target_size=(150, 150))
validation_datagen = ImageDataGenerator(rescale=1./255.)
validation_generator = validation_datagen.flow_from_directory(directory=VALIDATION_DIR,
batch_size=20,
class_mode='binary',
target_size=(150, 150))
return train_generator, validation_generator
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid') ])
from tensorflow.keras.optimizers import RMSprop
model.compile(optimizer=RMSprop(learning_rate=0.001),
loss='binary_crossentropy',
metrics=['accuracy'])
return model
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = create_model()
history = model.fit(train_generator,
epochs=50,
verbose=1,
validation_data=validation_generator)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C2W3L1_transfer_learning_cats_and_dogs_filtered.ipynb
---------------------------------------------------------------
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras import layers
local_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
pre_trained_model = InceptionV3(input_shape = (150, 150, 3),
include_top = False,
weights = None)
pre_trained_model.load_weights(local_weights_file)
# Freeze the weights of the layers.
for layer in pre_trained_model.layers:
layer.trainable = False
pre_trained_model.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Choose `mixed7` as the last layer of your base model
last_layer = pre_trained_model.get_layer('mixed7')
print('last layer output shape', last_layer.output_shape)
last_output = last_layer.output
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Add dense layers for your classifier
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras import Model
# Flatten the output layer to 1 dimension
x = layers.Flatten()(last_output)
# Add a fully connected layer with 1,024 hidden units and ReLU activation
x = layers.Dense(1024, activation='relu')(x)
# Add a dropout rate of 0.2
x = layers.Dropout(0.2)(x)
# Add a final sigmoid layer for classification
x = layers.Dense(1, activation='sigmoid')(x)
# Append the dense network to the base model
model = Model(pre_trained_model.input, x)
model.summary()
<<<<<<< HEAD
model.compile(optimizer=RMSprop(learning_rate=0.0001),
loss = 'binary_crossentropy',
metrics = ['accuracy'])
=======
model.compile(optimizer=RMSprop(learning_rate=0.0001),
loss = 'binary_crossentropy',
metrics = ['accuracy'])
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
train_datagen = ImageDataGenerator(rescale = 1./255.,
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True )
test_datagen = ImageDataGenerator(rescale = 1./255.)
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size = 20,
class_mode = 'binary',
target_size = (150, 150) )
validation_generator = test_datagen.flow_from_directory(validation_dir,
batch_size = 20,
class_mode = 'binary',
target_size = (150, 150) )
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
history = model.fit(
train_generator,
validation_data = validation_generator,
steps_per_epoch = 100,
epochs = 20,
validation_steps = 50,
verbose =1)
<<<<<<< HEAD
# Evaluate the results
=======
# Evaluate the results
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C2W3LwAssignment_TransferLearning_Horses_Humans.ipynb
---------------------------------------------------------------
from tensorflow.keras.applications.inception_v3 import InceptionV3
local_weights_file = './tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
def create_pre_trained_model(local_weights_file):
pre_trained_model = InceptionV3(input_shape = (150, 150, 3),
include_top = False,
weights = None)
pre_trained_model.load_weights(local_weights_file)
for layer in pre_trained_model.layers:
layer.trainable = False
return pre_trained_model
<<<<<<< HEAD
pre_trained_model = create_pre_trained_model(local_weights_file)
pre_trained_model.summary()
=======
pre_trained_model = create_pre_trained_model(local_weights_file)
pre_trained_model.summary()
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def train_val_generators(TRAINING_DIR, VALIDATION_DIR):
train_datagen = ImageDataGenerator(
rescale = 1. / 255.,
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True )
train_generator = train_datagen.flow_from_directory(directory=TRAINING_DIR,
batch_size=32,
class_mode='binary',
target_size=(150, 150) )
validation_datagen = ImageDataGenerator(rescale = 1./255.)flow_from_directory method
validation_generator = validation_datagen.flow_from_directory(directory=VALIDATION_DIR,
batch_size=32,
class_mode='binary',
target_size=(150, 150) )
return train_generator, validation_generator
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get('accuracy') > 0.999):
print("\nReached 99.9% accuracy so cancelling training!")
self.model.stop_training = True
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def output_of_last_layer(pre_trained_model):
last_desired_layer = pre_trained_model.get_layer('mixed7')
print('last layer output shape: ', last_desired_layer.output_shape)
last_output = last_desired_layer.output
print('last layer output: ', last_output)
return last_output
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def create_final_model(pre_trained_model, last_output):
x = layers.Flatten()(last_output)
x = layers.Dense(1024, activation='relu')(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(1, activation='sigmoid')(x)
model = Model(inputs=pre_trained_model.input, outputs=x)
model.compile(optimizer = RMSprop(learning_rate=0.0001),
loss = 'binary_crossentropy',
metrics = ['accuracy'])
return model
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = create_final_model(pre_trained_model, last_output)
total_params = model.count_params()
num_trainable_params = sum([w.shape.num_elements() for w in model.trainable_weights])
print(f"There are {total_params:,} total parameters in this model.")
print(f"There are {num_trainable_params:,} trainable parameters in this model.")
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
callbacks = myCallback()
history = model.fit(train_generator,
validation_data = validation_generator,
epochs = 100,
verbose = 2,
callbacks = callbacks)
<<<<<<< HEAD
# Plot the training and validation accuracies for each epoch
=======
# Plot the training and validation accuracies for each epoch
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C2W4L1_MultiClassifier_Rock_Paper_Scissors.ipynb
---------------------------------------------------------------
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax') ])
model.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Set the training parameters
model.compile(loss = 'categorical_crossentropy',
optimizer = 'rmsprop',
metrics = ['accuracy'] )
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.preprocessing.image import ImageDataGenerator
TRAINING_DIR = './tmp/rps-train/rps'
VALIDATION_DIR = './tmp/rps-test/rps-test-set'
training_datagen = ImageDataGenerator(
rescale = 1./255.,
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = 'nearest')
validation_datagen = ImageDataGenerator(rescale = 1./255.)
train_generator = training_datagen.flow_from_directory(
TRAINING_DIR,
target_size = (150, 150),
class_mode = 'categorical',
batch_size = 126 )
validation_generator = validation_datagen.flow_from_directory(
VALIDATION_DIR,
target_size = (150, 150),
class_mode = 'categorical',
batch_size = 126 )
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
history = model.fit(
train_generator,
epochs = 25,
steps_per_epoch = 20,
validation_data = validation_generator,
verbose = 1,
validation_steps = 3 )
<<<<<<< HEAD
# Plot the results
=======
# Plot the results
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Model Prediction [paper, rock, scissors]
import numpy as np
from google.colab import files
from tensorflow.keras.utils import load_img, img_to_array
uploaded = files.upload()
for fn in uploaded.keys():
path = fn
img = load_img(path, target_size=(150, 150))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(fn)
print(classes)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C2W4LwAssignment_MultiClassification_26LettersOfTheEnglishAlphabetCSV.ipynb
---------------------------------------------------------------
def parse_data_from_input(filename):
with open(filename) as file:
csv_reader = csv.reader(file, delimiter=',')
next(csv_reader)
data = []
for row in csv_reader:
label = float(row[0])
image_pixels = [float(x) for x in row[1:]]
image = np.array(image_pixels).reshape(28, 28)
data.append((label, image))
labels, images = zip(*data)
labels = np.array(labels, dtype=np.float64)
images = np.array(images, dtype=np.float64)
return images, labels
<<<<<<< HEAD
# Plot a sample of 10 images from the training set
=======
# Plot a sample of 10 images from the training set
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def train_val_generators(training_images, training_labels, validation_images, validation_labels):
num = 26
training_images = np.expand_dims(training_images, -1)
validation_images = np.expand_dims(validation_images, -1)
train_datagen = ImageDataGenerator(rescale = 1./255.,
zoom_range=0.2)
train_generator = train_datagen.flow(x = training_images,
# y = training_labels,
y=tf.keras.utils.to_categorical(training_labels, num_classes=num),
batch_size = 32)
validation_datagen = ImageDataGenerator(rescale = 1./255.)
validation_generator = validation_datagen.flow(x = validation_images,
# y = validation_labels,
y=tf.keras.utils.to_categorical(validation_labels, num_classes=num),
batch_size=32)
return train_generator, validation_generator
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(52, (3, 3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(52, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(26, activation='softmax') ])
model.compile(optimizer = 'rmsprop',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
return model
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = create_model()
history = model.fit(train_generator,
epochs=15,
validation_data = validation_generator)
<<<<<<< HEAD
# Plot the chart for accuracy and loss on both training and validation
=======
# Plot the chart for accuracy and loss on both training and validation
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
C3W1L1_Tokenizer_Basics.ipynb
---------------------------------------------------------------
from tensorflow.keras.preprocessing.text import Tokenizer
sentences =[
'i love my dog',
'I love my cat:']
tokenizer = Tokenizer(num_words = 100)
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
print(word_index)
{'i': 1, 'love': 2, 'my': 3, 'dog': 4, 'cat': 5}
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W1L2_Generating_Sequences_Padding.ipynb
---------------------------------------------------------------
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
sentences = [
'I love my dog',
'I, love my cat',
'You love my dog',
'Do you think my dog is amazing?']
tokenizer = Tokenizer(num_words = 100, oov_token = '<OOV>')
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences, maxlen=5)
print("\nWord Index = ", word_index)
print("\nSequences = ", sequences)
Word Index = {'<OOV>': 1, 'my': 2, 'love': 3, 'dog': 4, 'i': 5, 'you': 6, 'cat': 7, 'do': 8, 'think': 9, 'is': 10, 'amazing': 11}
Sequences = [[5, 3, 2, 4], [5, 3, 2, 7], [6, 3, 2, 4], [8, 6, 9, 2, 4, 10, 11]]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
print("\nPadded Sequences:")
print(padded)
Padded Sequences:
[[ 0 5 3 2 4]
[ 0 5 3 2 7]
[ 0 6 3 2 4]
[ 9 2 4 10 11]]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W1L3_sarcasm_detection.ipynb
---------------------------------------------------------------
import json
with open("./sarcasm.json", 'r') as f:
datastore = json.load(f)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
sentences = []
labels = []
urls = []
for item in datastore:
sentences.append(item['headline'])
labels.append(item['is_sarcastic'])
urls.append(item['article_link'])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer(oov_token="<OOV>")
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
print(f"number of words in word_index: {len(word_index)}")
print(f"word_index: {word_index}")
print()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences, padding='post')
index = 2
print(f"sample headline: {sentences[index]}")
print(f"padded sequence: {padded[index]}")
print()
print(f"shape of padded sequences: {padded.shape}")
<<<<<<< HEAD
number of words in word_index: 29657
word_index: {'<OOV>': 1, 'to': 2, 'of': 3, 'the': 4, 'in': 5, 'for': 6, ...
=======
number of words in word_index: 29657
word_index: {'<OOV>': 1, 'to': 2, 'of': 3, 'the': 4, 'in': 5, 'for': 6, ...
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
sample headline: mom starting to fear sons web series closest thing she will have to grandchild
padded sequence: [ 145 838 2 907 1749 2093 582 4719 221 143 39 46
2 10736 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from google.colab import runtime
stop_runtime = input("Stop runtime('y' or 'n'): ")
if stop_runtime == 'y':
runtime.unassign()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W1LwAssignment_BBC_News_archive.ipynb
---------------------------------------------------------------
def remove_stopwords(sentence):
stopwords = ["a", "about", "above", "after", "again"...
sentence = sentence.lower()
words = sentence.split()
filtered_words = [word for word in words if word not in stopwords]
sentece = " ".join(filtered_words)
return sentece
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def parse_data_from_file(filename):
sentences = []
labels = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader)
for row in reader:
label = row[0]
sentence = remove_stopwords(row[1])
sentences.append(sentence)
labels.append(label)
return sentences, labels
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def fit_tokenizer(sentences):
tokenizer = Tokenizer(oov_token='<OOV>')
tokenizer.fit_on_texts(sentences)
return tokenizer
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def get_padded_sequences(tokenizer, sentences):
sequences = tokenizer.texts_to_sequences(sentences)
padded_sequences = pad_sequences(sequences, padding='post')
return padded_sequences
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def tokenize_labels(labels):
label_tokenizer = Tokenizer()
label_tokenizer.fit_on_texts(labels)
label_word_index = label_tokenizer.word_index
label_sequences = label_tokenizer.texts_to_sequences(labels)
return label_sequences, label_word_index
<<<<<<< HEAD
label_sequences, label_word_index = tokenize_labels(labels)
print(f"Vocabulary of labels looks like this {label_word_index}\n")
print(f"First ten sequences {label_sequences[:10]}\n")
Vocabulary of labels looks like this {'sport': 1, 'business': 2, 'politics': 3, 'tech': 4, 'entertainment': 5}
First ten sequences [[4], [2], [1], [1], [5], [3], [3], [1], [1], [5]]
=======
label_sequences, label_word_index = tokenize_labels(labels)
print(f"Vocabulary of labels looks like this {label_word_index}\n")
print(f"First ten sequences {label_sequences[:10]}\n")
Vocabulary of labels looks like this {'sport': 1, 'business': 2, 'politics': 3, 'tech': 4, 'entertainment': 5}
First ten sequences [[4], [2], [1], [1], [5], [3], [3], [1], [1], [5]]
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from google.colab import runtime
stop_runtime = input("Stop runtime('y' or 'n'): ")
if stop_runtime == 'y':
runtime.unassign()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W2L1_IMDB_Reviews_Dataset.ipynb
---------------------------------------------------------------
train_data, test_data = imdb['train'], imdb['test']
training_sentences = []
training_labels = []
testing_sentences = []
testing_labels = []
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
for s,l in train_data:
training_sentences.append(s.numpy().decode('utf8'))
training_labels.append(l.numpy())
for s,l in test_data:
testing_sentences.append(s.numpy().decode('utf8'))
testing_labels.append(l.numpy())
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Parameters
vocab_size = 10000
max_length = 150
embedding_dim = 16
trunc_type = 'post'
oov_tok = "<OOV>"
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
sequences = tokenizer.texts_to_sequences(training_sentences)
padded = pad_sequences(sequences, maxlen=max_length, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, truncating=trunc_type)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Flatten(),
# tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='relu'),
tf.keras.layers.Dense(3, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
num_epochs = 10
model.fit(padded,
training_labels_final,
epochs=num_epochs,
validation_data=(testing_padded, testing_labels_final))
<<<<<<< HEAD
# Visualize Word Embeddings
files.download('vecs.tsv')
files.download('meta.tsv')
=======
# Visualize Word Embeddings
files.download('vecs.tsv')
files.download('meta.tsv')
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from google.colab import runtimestop_runtime = input("Stop runtime('y' or 'n'): ")
if stop_runtime == 'y':
runtime.unassign()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W2L2_Sarcasm_Dataset_binary_classifier.ipynb
---------------------------------------------------------------
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
trunc_type = 'post'
padding_type = 'post'
oov_tok = "<OOV>"
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences,
maxlen=max_length,
padding=padding_type,
truncating=trunc_type)
training_labels = np.array(training_labels)
testing_labels = np.array(testing_labels)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid') ])
model.summary()
<<<<<<< HEAD
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
=======
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
num_epochs = 30
history = model.fit(training_padded,
training_labels,
epochs=num_epochs,
validation_data=(testing_padded, testing_labels),
verbose=1)
<<<<<<< HEAD
# Visualize the Results
# Plot utility
=======
# Visualize the Results
# Plot utility
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Visualize Word Embeddings
files.download('vesc_c3w2l2.tsv')
files.download('meta_c3w2l2.tsv')
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W2L3_Subword_Tokenization_IMDB_Dataset.ipynb
---------------------------------------------------------------
train_data = imdb_plaintext['train']
training_sentences = []
for s,_ in train_data:
training_sentences.append(s.numpy().decode('utf8'))
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
vocab_size = 10000
oov_tok = '<OOV>'
tokenizer_plaintext = Tokenizer(num_words=vocab_size, oov_token=oov_tok)sentences
tokenizer_plaintext.fit_on_texts(training_sentences)
sequences = tokenizer_plaintext.texts_to_sequences(training_sentences)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
BUFFER_SIZE = 10000
BATCH_SIZE = 64
train_data, test_data = imdb_subwords['train'], imdb_subwords['test']
train_dataset = train_data.shuffle(BUFFER_SIZE)
train_dataset = train_dataset.padded_batch(BATCH_SIZE)
test_dataset = test_data.padded_batch(BATCH_SIZE)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
import tensorflow as tf
embedding_dim = 64
model = tf.keras.Sequential([
tf.keras.layers.Embedding(tokenizer_subwords.vocab_size, embedding_dim),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
num_epochs = 10
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(train_dataset,
epochs=num_epochs,
validation_data=test_dataset)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Visualize the results
# Plot utility
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W2Lw_Diving_deeper_into_BBC_News.ipynb
---------------------------------------------------------------
def remove_stopwords(sentence):
stopwords = ["a", "about", "above", "after", "again", ...
sentence = sentence.lower()
words = sentence.split()
no_words = [w for w in words if w not in stopwords]
sentence = " ".join(no_words)
return sentence
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def parse_data_from_file(filename):
sentences = []
labels = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader)
for row in reader:
labels.append(row[0])
sentence = row[1]
sentence = remove_stopwords(sentence)
sentences.append(sentence)
return sentences, labels
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def train_val_split(sentences, lables, training_split):
train_size = int(int(len(sentences))*training_split)
train_sentences = sentences[:train_size]
train_labels = labels[:train_size]
validation_sentences = sentences[train_size:]
validation_labels = labels[train_size:]
return train_sentences, validation_sentences, train_labels, validation_labels
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def fit_tokenizer(train_sentences, num_words, oov_token):
tokenizer = Tokenizer(num_words=num_words, oov_token=oov_token)
tokenizer.fit_on_texts(train_sentences)
return tokenizer
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def seq_and_pad(sentences, tokenizer, padding, maxlen):
sequences = tokenizer.texts_to_sequences(sentences)
padded_sequences = pad_sequences(sequences, padding=padding, maxlen=maxlen)
return padded_sequences
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def tokenize_labels(all_labels, split_labels):
label_tokenizer = Tokenizer(lower=True, oov_token=None)
label_tokenizer.fit_on_texts(all_labels)
label_seq = label_tokenizer.texts_to_sequences(split_labels)
label_seq_np = np.array(label_seq) - 1
return label_seq_np
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def create_model(num_words, embedding_dim, maxlen):
tf.random.set_seed(123)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(num_words, embedding_dim, input_length=maxlen),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(5, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax') ])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = create_model(NUM_WORDS, EMBEDDING_DIM, MAXLEN)
history = model.fit(train_padded_seq,
train_labels_seq,
epochs=30,
validation_data=(val_padded_seq, val_labels_seq))
<<<<<<< HEAD
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
=======
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Optional Exercise - Visualizing 3D Vectors
files.download('vecs_c3w2lw.tsv')
files.download('meta_c3w2lw.tsv')
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W3L1_LSTM_Single_Layer.ipynb
---------------------------------------------------------------
BUFFER_SIZE = 10000
BATCH_SIZE = 256
train_data, test_data = dataset['train'], dataset['test']
train_dataset = train_data.shuffle(BUFFER_SIZE)
train_dataset = train_dataset.padded_batch(BATCH_SIZE)
test_dataset = test_data.padded_batch(BATCH_SIZE)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
embedding_dim = 64
lstm_dim = 64
dense_dim = 64
model = tf.keras.Sequential([
tf.keras.layers.Embedding(tokenizer.vocab_size, embedding_dim),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm_dim)),
tf.keras.layers.Dense(dense_dim, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model.summary()
<<<<<<< HEAD
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
=======
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
num_epochs = 10
history = model.fit(train_dataset,
epochs=num_epochs,
validation_data=test_dataset)
<<<<<<< HEAD
# Visualize the results
=======
# Visualize the results
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W3L2_LSTM_Multiple.ipynb
---------------------------------------------------------------
embedding_dim = 64
lstm1_dim = 64
lstm2_dim = 32
dense_dim = 64
model = tf.keras.Sequential([
tf.keras.layers.Embedding(tokenizer.vocab_size, embedding_dim),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm1_dim, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm2_dim)),
tf.keras.layers.Dense(dense_dim, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
num_epochs = 10
history = model.fit(train_dataset,
epochs=num_epochs,
validation_data=test_dataset)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Plot the accuracy and results
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W3L3_CNN_Conv1D.ipynb
---------------------------------------------------------------
model = tf.keras.Sequential([
tf.keras.layers.Embedding(tokenizer.vocab_size, embedding_dim),
tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, activation='relu'),
tf.keras.layers.GlobalMaxPooling1D(),
tf.keras.layers.Dense(dense_dim, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
num_epochs = 10
history = model.fit(train_dataset,
epochs=num_epochs,
validation_data=test_dataset)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Plot the accuracy and results
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W3L4_GRU_LSTM_Conv1D_imdb_reviews.ipynb
---------------------------------------------------------------
# Building Models for the IMDB Reviews Dataset
# Model 1: Flatten
embedding_dim = 16
dense_dim = 6
model_flatten = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(dense_dim, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model_flatten.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model_flatten.summary()
# Model 2: LSTM
model_lstm = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm_dim)),
tf.keras.layers.Dense(dense_dim, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model_lstm.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model_lstm.summary()
# Model 3: GRU
embedding_dim = 16
gru_dim = 32
dense_dim = 6
model_gru = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(gru_dim)),
tf.keras.layers.Dense(dense_dim, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model_gru.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model_gru.summary()
# Model 4: Convolution
embedding_dim = 16
filters = 128
kernel_size = 5
dense_dim = 6
model_conv = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Conv1D(filters, kernel_size, activation='relu'),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(dense_dim, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model_conv.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model_conv.summary()
# Model 5: Convolution, From C3W3L3_CNN_Conv1D
embedding_dim = 16
filters = 128
kernel_size = 5
dense_dim = 6
model_conv2 = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Conv1D(filters, kernel_size, activation='relu'),
tf.keras.layers.GlobalMaxPooling1D(),
tf.keras.layers.Dense(dense_dim, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model_conv2.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model_conv.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W3L5_Sarcasm_with_Bidirectional_LSTM.ipynb
---------------------------------------------------------------
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
vocab_size = 10000
max_length = 120
trunc_type = 'post'
padding_type = 'post'
oov_tok = '<OOV>'
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences,
maxlen=max_length,
padding=padding_type,
truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences,
maxlen=max_length,
padding=padding_type,
truncating=trunc_type)
training_labels = np.array(training_labels)
testing_labels = np.array(testing_labels)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
embedding_dim = 16
lstm_dim = 32
dense_dim = 24
model_lstm = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm_dim)),
tf.keras.layers.Dense(dense_dim, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model_lstm.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model_lstm.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Plot the accuracy and loss history
plot_graphs(history_lstm, 'accuracy')
plot_graphs(history_lstm, 'loss')
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W3L6_sarcasm_1D_convolutional.ipynb
---------------------------------------------------------------
embedding_dim = 16
filters = 128
kernel_size = 5
dense_dim = 6
model_conv = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Conv1D(filters, kernel_size, activation='relu'),
tf.keras.layers.GlobalMaxPooling1D(),
tf.keras.layers.Dense(dense_dim, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model_conv.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model_conv.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Plot the accuracy and loss history
plot_graphs(history_conv, 'accuracy')
plot_graphs(history_conv, 'loss')
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W3Lw_Overfitting_NLP_Sentiment140dataset.ipynb
---------------------------------------------------------------
# Using pre-defined Embeddings
GLOVE_FILE ='./data/glove.6B.100d.txt'
GLOVE_EMBEDDINGS = {}
with open(GLOVE_FILE) as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
GLOVE_EMBEDDINGS[word] = coefs
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Represent the words in your vocabulary using the embeddings
EMBEDDINGS_MATRIX = np.zeros((VOCAB_SIZE+1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = GLOVE_EMBEDDINGS.get(word)
if embedding_vector is not None:
EMBEDDINGS_MATRIX[i] = embedding_vector
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def create_model(vocab_size, embedding_dim, maxlen, embeddings_matrix):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size+1,
embedding_dim,
input_length=maxlen,
weights=[embeddings_matrix],
trainable=False),
tf.keras.layers.Conv1D(32, 5, activation='relu'),
tf.keras.layers.GlobalMaxPooling1D(),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid') ])
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = create_model(VOCAB_SIZE, EMBEDDING_DIM, MAXLEN, EMBEDDINGS_MATRIX)
history = model.fit(train_pad_trunc_seq,
train_labels,
epochs=20,
validation_data=(val_pad_trunc_seq, val_labels))
<<<<<<< HEAD
# Plot training and validation loss per epoch
# Plot training and validation accuracy per epoch
=======
# Plot training and validation loss per epoch
# Plot training and validation accuracy per epoch
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Test the slope of your val_loss curve
slope, *_ = linregress(epochs, val_loss)
print(f"The slope of your validation loss curve is {slope:.5f}")
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W4L1_Generating_Text.ipynb
---------------------------------------------------------------
# Define the lyrics of the song
data= "In the town of Athy one Jeremy Lanigan \n Battered away til …"
corpus = data.lower().split("\n")
print(corpus)
[ 'in the town of athy one jeremy lanigan ', ' battered away til he .....'
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index)+1
print(f"Word index dictionary: {tokenizer.word_index}")
print(f"Total words: {total_words}")
Word index dictionary: {'and': 1, 'the': 2, 'a': 3, ...
Total words: 263
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
max_sequence_len = max([len(x) for x in input_sequences])
# Pad all sequences
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequencee_len, padding='pre'))
# Create inputs and label by splitting the last token in the subphrases
xs, labels = input_sequences[:, :-1], input_sequences[:, -1]
# Convert the label into one-hot arrays
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
<<<<<<< HEAD
# Print token list and phrase
Token list: [ 0 0 0 4 2 66 8 67 68 69]
Decoded to text: ['in the town of athy one jeremy']
# Print label
One-not label: [0. 0. 0. ..... 1. ...]
Index of label: 70
=======
# Print token list and phrase
Token list: [ 0 0 0 4 2 66 8 67 68 69]
Decoded to text: ['in the town of athy one jeremy']
# Print label
One-not label: [0. 0. 0. ..... 1. ...]
Index of label: 70
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = Sequential([
Embedding(total_words, 64, input_length=max_sequence_len-1),
Bidirectional(LSTM(20)),
Dense(total_words, activation='softmax')])
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
history = model.fit(xs, ys, epochs=500)
<<<<<<< HEAD
# Visualize the accuracy
plot_graphs(history, 'accuracy')
=======
# Visualize the accuracy
plot_graphs(history, 'accuracy')
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Define seed text
seed_text = "Laurence went to Dublin"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
probabilities = model.predict([token_list])
predicted = np.argmax(probabilities, axis=-1)[0]
if predicted != 0:
output_word = tokenizer.index_word[predicted]
seed_text += " " + output_word
print(seed_text)
Laurence went to Dublin young bacon young bacon from young bacon her myself ...
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W4L2_Generating_irish_lyrics.ipynb
---------------------------------------------------------------
# Load the dataset
data = open("./irish-lyrics-eof.txt").read()
corpus = data.lower().split("\n")
print(corpus)
['come all ye maidens young and fair', 'and you' .....
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Get sample sentence
sentence = corpus[0].split()
print(f"Sample sentence: {sentence}")
# Print the token list
print(token_list)
Sample sentence: ['come', 'all', 'ye', 'maidens', 'young', 'and', 'fair']
[51, 12, 96, 1217, 48, 2, 69]
<<<<<<< HEAD
# Pick element
elem_number = 5
=======
# Pick element
elem_number = 5
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Print token list and phrase
print(f"Token list: {xs[elem_number]}")
print(f"Decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}\n")
# Print label
print(f"One-hot label: {ys[elem_number]}")
print(f"Index of label: {np.argmax(ys[elem_number])}")
Token list: [ 0 0 0 0 0 0 0 0 0 51 12 96 1217 48 2]
Decoded to text: ['come all ye maidens young and']
One-hot label: [0. 0. 0. ... 0. 0. 0.]
Index of label: 69
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Hyperparameters
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
model = Sequential([
Embedding(total_words, embedding_dim, input_length= max_sequence_len-1),
Bidirectional(LSTM(lstm_units)),
Dense(total_words, activation='softmax')])
model.compile(
loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
metrics=['accuracy'])
model.summary()
history = model.fit(xs, ys, epochs=num_epochs)
<<<<<<< HEAD
# Visualize the accuracy
plot_graphs(history, 'accuracy')
plot_graphs(history, 'loss')
=======
# Visualize the accuracy
plot_graphs(history, 'accuracy')
plot_graphs(history, 'loss')
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Define seed text
seed_text = "help me obi-wan kinobi youre me only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
probabilities = model.predict(token_list, verbose=0)
predicted = np.argmax(probabilities, axis=-1)[0]
if predicted != 0:
output_word = tokenizer.index_word[predicted]
seed_text += " " + output_word
print(seed_text)
help me obi-wan kinobi youre me only hope and girls and girls and gave ....
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W4L3_Generating_with_RNN_Shakespeare.ipynb
---------------------------------------------------------------
text = open(path_to_file, 'rb').read().decode(encoding='utf-8')
print(f"Length of text: {len(text)} characters.\n")
print(text[:250])
Length of text: 1115394 characters.
First Citizen:
Before we proceed any further, hear me speak.
All:
Speak, speak.
First Citizen:
You are all resolved rather to die than to famish?
All:
Resolved. resolved.
First Citizen:
First, you know Caius Marcius is chief enemy to the people.
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
class MyModel(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, rnn_units):
super().__init__(self)
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(rnn_units,
return_sequences=True,
return_state=True)
self.dense = tf.keras.layers.Dense(vocab_size)
def call(self, inputs, states=None, return_state=False, training=False):
x = inputs
x = self.embedding(x, training=training)
if states is None:
states = self.gru.get_initial_state(x)
x, states = self.gru(x, initial_state=states, training=training)
x = self.dense(x, training=training)
if return_state:
return x, states
else:
return x
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
EPOCHS = 20
history = model.fit(dataset,
epochs=EPOCHS,
callbacks=[checkpoint_callback])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
class OneStep(tf.keras.Model):
def __init__(self, model, chars_from_ids, ids_from_chars, temperature=1.0):
super().__init__()
self.temperature = temperature
self.model = model
self.chars_from_ids = chars_from_ids
self.ids_from_chars = ids_from_chars
skip_ids = self.ids_from_chars(['[UNK]'])[:, None]
sparse_mask = tf.SparseTensor(
# Put a -inf at each bad index.
values=[-float('inf')]*len(skip_ids),
indices=skip_ids,
dense_shape=[len(ids_from_chars.get_vocabulary())] )
self.prediction_mask = tf.sparse.to_dense(sparse_mask)
@tf.function
def generate_one_step(self, inputs, states=None):
input_chars = tf.strings.unicode_split(inputs, 'UTF-8')
input_ids = self.ids_from_chars(input_chars).to_tensor()
predicted_logits, states = self.model(inputs=input_ids,
states=states,
return_state=True)
predicted_logits = predicted_logits[:, -1, :]
predicted_logits = predicted_logits/self.temperature
predicted_logits = predicted_logits + self.prediction_mask
predicted_ids = tf.random.categorical(predicted_logits, num_samples=1)
predicted_ids = tf.squeeze(predicted_ids, axis=-1)
predicted_chars = self.chars_from_ids(predicted_ids)
return predicted_chars, states
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
one_step_model = OneStep(model, chars_from_ids, ids_from_chars)
print(result[0].numpy().decode('utf-8'), '\n\n' + '_'*80)
print('\nRun tim: ', end-start )
ROMEO:
Being moved!
Monday must have weals;' trembling women,
Her name by apparel for a certain text may pray.
Give me the rark, and go about that are abroad.
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
LORD ROSS:
Lay here, my lord, to call my lord, to rate;
Poor heaven, father! be not hours nor 't:
'That should short thou old my land; O, fear thou never wind-sworn
Ares to a marvellous presently at his death'. ......
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
#Advanced: Customized Training
Epoch 1 Batch 0 Loss 0.6597
Epoch 1 Batch 50 Loss 0.6368
Epoch 1 Batch 100 Loss 0.6287
Epoch 1 Batch 150 Loss 0.6278
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
Epoch 1 Loss: 0.6346
Time taken for 1 epoch 15.82 sec
....
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C3W4Lw_Predicting_next_word_Shakespeare.ipynb
---------------------------------------------------------------
SONNETS_FILE = './sonnets.txt'
with open(SONNETS_FILE) as f:
data = f.read()
corpus = data.lower().split("\n")
print(f"There are {len(corpus)} lines of sonnets\n")
print(f"The firs 5 lines look like this:\n")
for i in range(5):
print(corpus[i])
There are 2159 lines of sonnets
The firs 5 lines look like this:
"from fairest creatures we desire increase,
that thereby beauty''s rose might never die,
but as the riper should by time decease,
his tender heir might bear his memory:
but thou, contracted to thine own bright eyes,"
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
tokenizer.texts_to_sequences([corpus[0]])[0]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def features_and_labels(input_sequences, total_words):
features = input_sequences[:, :-1]
labels = input_sequences[:, -1]
one_hot_labels = to_categorical(labels, num_classes=total_words)
return features, one_hot_labels
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Split the whole corpus
features, labels = features_and_labels(input_sequences, total_words)
print(f"Features have shape: {features.shape}")
print(f"Labels have shape: {labels.shape}")
Features have shape: (15462, 10)
Labels have shape: (15462, 3211)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def create_model(total_words, max_sequence_len):
model = Sequential()
model.add(Embedding(total_words, 100, input_length=max_sequence_len-1))
model.add(Bidirectional(LSTM(128)))
model.add(Dense(total_words, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
model = create_model(total_words, max_sequence_len)
history = model.fit(features, labels, epochs=50, verbose=1)
<<<<<<< HEAD
# Take a look at the training curves of your model
plt.plot(epochs, acc, 'b', label='Training accuracy')
plt.plot(epochs, loss, 'b', label='Training loss')
=======
# Take a look at the training curves of your model
plt.plot(epochs, acc, 'b', label='Training accuracy')
plt.plot(epochs, loss, 'b', label='Training loss')
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
seed_text = "Help me Obi Wan Kenobi, you're my only hope"
next_words = 100
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
predicted = model.predict(token_list, verbose=0)
predicted = np.argmax(predicted, axis=-1).item()
output_word = tokenizer.index_word[predicted]
seed_text += " " + output_word
print(seed_text)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
output
"Help me Obi Wan Kenobi, you're my only hope what eyes ...."
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C4W1L1_Introduction_Time_Series.ipynb
---------------------------------------------------------------
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.figure(figsize=(10, 6))
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14, labels=label)
plt.grid(True)
plt.show()
<<<<<<< HEAD
def trend(time, slope=0):
series = slope * time
return series
=======
def trend(time, slope=0):
series = slope * time
return series
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
time = np.arange(365)
slope = 0.1
series = trend(time, slope)
plot_series(time, series, label=[f'slope={slope}'])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def seasonality_pattern(season_time):
data_pattern = np.where(season_time < 0.4,
np.cos(season_time*2*np.pi),
1/np.exp(3*season_time))
return data_pattern
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def seasonality(time, period, amplitude=1, phase=0):
season_time = ((time + phase) % period) / period
data_pattern = amplitude * seasonality_pattern(season_time)
return data_pattern
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
time = np.arange(4 * 365 + 1)
period = 365
amplitude = 40
series = seasonality(time, period=period, amplitude=amplitude)
plot_series(time, series)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
slope = 0.05
period = 365
amplitude = 40
series = trend(time, slope) + seasonality(time, period=period, amplitude=amplitude)
plot_series(time, series)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
noise = rnd.randn(len(time)) * noise_level
return noise
<<<<<<< HEAD
noise_level = 5
noise_signal = noise(time, noise_level=noise_level, seed=42)
plot_series(time, noise_signal)
series += noise_signal
plot_series(time, series)
=======
noise_level = 5
noise_signal = noise(time, noise_level=noise_level, seed=42)
plot_series(time, noise_signal)
series += noise_signal
plot_series(time, series)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def autocorrelation(time, amplitude, seed=None):
rnd = np.random.RandomState(seed)
ar = rnd.randn(len(time) + 50)
ar[:50] = 100
phi1 = 0.5
phi2 = -0.1
for step in range(50, len(time) + 50):
ar[step] += phi1 * ar[step - 50]
ar[step] += phi2 * ar[step - 33]
ar = ar[50:] * amplitude
return ar
<<<<<<< HEAD
series = autocorrelation(time, amplitude=10, seed=42)
plot_series(time[:200], series[:200])
=======
series = autocorrelation(time, amplitude=10, seed=42)
plot_series(time[:200], series[:200])
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Here is a more straightforward autocorrelation function which just computes a value from the previous time step.
def autocorrelation(time, amplitude, seed=None):
rnd = np.random.RandomState(seed)
ar = rnd.randn(len(time) + 1)
phi = 0.8
for step in range(1, len(time) + 1):
ar[step] += phi * ar[step - 1]
ar = ar[1:] * amplitude
return ar
<<<<<<< HEAD
series = autocorrelation(time, amplitude=10, seed=42)
plot_series(time[:200], series[:200])
=======
series = autocorrelation(time, amplitude=10, seed=42)
plot_series(time[:200], series[:200])
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Another autocorrelated time series you might encounter is one where it decays predictably after random spikes. You will first define the function that generates these spikes below.
def impulses(time, num_impulses, amplitude=1, seed=None):
rnd = np.random.RandomState(seed)
impulse_indices = rnd.randint(len(time), size=num_impulses)
series = np.zeros(len(time))
for index in impulse_indices:
series[index] += rnd.rand() * amplitude
return series
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def autocorrelation_impulses(source, phis):
ar = source.copy()
for step, value in enumerate(source):
for lag, phi in phis.items():
if step - lag > 0:
ar[step] += phi * ar[step - lag]
return ar
<<<<<<< HEAD
series = autocorrelation_impulses(impulses_signal, {1: 0.99})
plot_series(time, series)
# Here is another example where the next values are computed from those in t-1 and t-50:
# Autocorrelated data can also ride a trend line and it will look like below.
Non-stationary Time Series
Non-stationary Time Series
=======
series = autocorrelation_impulses(impulses_signal, {1: 0.99})
plot_series(time, series)
# Here is another example where the next values are computed from those in t-1 and t-50:
# Autocorrelated data can also ride a trend line and it will look like below.
Non-stationary Time Series
Non-stationary Time Series
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
C4W1L2_Statistical_Forecasting.ipynb
---------------------------------------------------------------
# Parameters
time = np.arange(4 * 365 +1, dtype="float32")
baseline = 10
amplitude = 40
slope = 0.05
noise_level = 5
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
series += noise(time, noise_level, seed=42)
plot_series(time, series)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
plot_series(time_train, x_train)
plot_series(time_valid, x_valid)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Generate the naive forecast
naive_forecast = series[split_time -1:-1]
plot_series(time_valid, (x_valid, naive_forecast))
print(tf.keras.metrics.mean_squared_error(x_valid, naive_forecast).numpy())
print(tf.keras.metrics.mean_absolute_error(x_valid, naive_forecast).numpy())
61.827534
5.937908
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Moving Average
def moving_average_forecast(series, window_size):
forecast = []
for time in range(len(series) - window_size):
forecast.append(series[time:time + window_size].mean())
forecast = np.array(forecast)
return forecast
moving_avg = moving_average_forecast(series, 30)[split_time - 30:]
plot_series(time_valid, (x_valid, moving_avg))
106.674576
7.142419
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Differencing
diff_series = (series[365:] - series[:-365])diff_time = time[365:]
diff_moving_avg = moving_average_forecast(diff_series, 30)
diff_moving_avg = diff_moving_avg[split_time - 365 - 30:]
diff_series = diff_series[split_time - 365:]
diff_moving_avg_plus_past = series[split_time - 365:-365] + diff_moving_avg
plot_series(time_valid, (x_valid, diff_moving_avg_plus_past))
53.764587
5.9032416
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Smoothing
diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time - 370:-359], 11) + diff_moving_avg
plot_series(time_valid, (x_valid, diff_moving_avg_plus_smooth_past))
34.315723
4.6053295
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C4W1Lw_time_series.ipynb
---------------------------------------------------------------
def trend(time, slope=0):
return slope * time
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def seasonal_pattern(season_time):
return np.where(season_time < 0.1,
np.cos(season_time * 7 * np.pi),
1 / np.exp(5 * season_time))
<<<<<<< HEAD
def seasonality(time, period, amplitude=1, phase=0):
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
=======
def seasonality(time, period, amplitude=1, phase=0):
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def plot_series(time, series, format="-", title="", label=None, start=0, end=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
plt.title(title)
if label:
plt.legend()
plt.grid(True)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
TIME = np.arange(4 * 365 + 1, dtype="float32")
y_intercept = 10
slope = 0.01
SERIES = trend(TIME, slope) + y_intercept
amplitude = 40
SERIES += seasonality(TIME, period=365, amplitude=amplitude)
noise_level = 2
SERIES += noise(TIME, noise_level, seed=42)
plt.figure(figsize=(10, 6))
plot_series(TIME, SERIES)
plt.show()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def compute_metrics(true_series, forecast):
mse = tf.keras.metrics.mean_squared_error(true_series, forecast).numpy()
mae = tf.keras.metrics.mean_absolute_error(true_series, forecast).numpy()
return mse, mae
<<<<<<< HEAD
naive_forecast = SERIES[SPLIT_TIME - 1:-1]
mse: 19.58, mae: 2.60 for naive forecast
=======
naive_forecast = SERIES[SPLIT_TIME - 1:-1]
mse: 19.58, mae: 2.60 for naive forecast
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def moving_average_forecast(series, window_size):
forecast = []
for time in range(len(series) - window_size):
forecast.append(series[time:time + window_size].mean())
np_forecast = np.array(forecast)
return np_forecast
<<<<<<< HEAD
moving_avg = moving_average_forecast(SERIES, window_size=30)
moving_avg = moving_avg[1100 - 30:]
mse: 65.79, mae: 4.30 for moving average forecast
diff_series = (SERIES[365:] - SERIES[:-365])
diff_time = TIME[365:]
=======
moving_avg = moving_average_forecast(SERIES, window_size=30)
moving_avg = moving_avg[1100 - 30:]
mse: 65.79, mae: 4.30 for moving average forecast
diff_series = (SERIES[365:] - SERIES[:-365])
diff_time = TIME[365:]
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
diff_moving_avg = moving_average_forecast(diff_series, 50)
diff_moving_avg = diff_moving_avg[SPLIT_TIME - 365 - 50:]
past_series = SERIES[SPLIT_TIME - 365:-365]
diff_moving_avg_plus_past = past_series + diff_moving_avg
mse: 8.50, mae: 2.33 for moving average plus past forecast
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
C4W2L1_ Preparing_Time_Series.ipynb
---------------------------------------------------------------
# Flatten the Windows
# Generate a tf dataset with 10 elements (i.e. numbers 0 to 9)
dataset = tf.data.Dataset.range(10)
# Window the data but only take those with the specified size
dataset = dataset.window(5, shift=1, drop_remainder=True)
# Flatten the windows by putting its elements in a single batch
dataset = dataset.flat_map(lambda window: window.batch(5))
# Print the results
for window in dataset:
print(window.numpy())
[0 1 2 3 4]
[1 2 3 4 5]
[2 3 4 5 6]
[3 4 5 6 7]
[4 5 6 7 8]
[5 6 7 8 9]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Group into features and labels
# Generate a tf dataset with 10 elements (i.e. numbers 0 to 9)
dataset = tf.data.Dataset.range(10)
# Window the data but only take those with the specified size
dataset = dataset.window(5, shift=1, drop_remainder=True)
# Flatten the windows by putting its elements in a single batch
dataset = dataset.flat_map(lambda window: window.batch(5))
# Create tuples with features (first four elements of the window) and labels (last element)
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
# Print the results
for x, y in dataset:
print(x.numpy(), y.numpy())
[0 1 2 3] 4
[1 2 3 4] 5
[2 3 4 5] 6
[3 4 5 6] 7
[4 5 6 7] 8
[5 6 7 8] 9
<<<<<<< HEAD
# Shuffle the data
# Shuffle the windows
dataset = dataset.shuffle(buffer_size=10)
=======
# Shuffle the data
# Shuffle the windows
dataset = dataset.shuffle(buffer_size=10)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Create batches for training
# Create batches of windows
dataset = dataset.batch(2).prefetch(1)
output
[[1 2 3 4]
[0 1 2 3]] [5 4]
[[5 6 7 8]
[4 5 6 7]] [9 8]
[[3 4 5 6]
[2 3 4 5]] [7 6]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C4W2L2_ Single_Layer_NN_Time_Series.ipynb
---------------------------------------------------------------
# Generate the Synthetic Data
# Parameters
time = np.arange(4 * 365 + 1, dtype="float32")
baseline = 10
amplitude = 40
slope = 0.05
noise_level = 5
# Create the series
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
# Update with noise
series += noise(time, noise_level, seed=42)
# Plot the results
plot_series(time, series)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Define the split time
split_time = 1000
# Get the train set
time_train = time[:split_time]
x_train = series[:split_time]
# Get the validation set
time_valid = time[split_time:]
x_valid = series[split_time:]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Prepare features and labels
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
# Generate a TF Dataset from the series values
dataset = tf.data.Dataset.from_tensor_slices(series)
# Window the data but only take those with the specified size
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
# Flatten the windows by putting its elements in a single batch
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
# Create tuples with features and labels
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
# Shuffle the windows
dataset = dataset.shuffle(shuffle_buffer)
# Create batches of windows
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
<<<<<<< HEAD
# Generate the dataset windows
dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
=======
# Generate the dataset windows
dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Build the single layer neural network
l0 = tf.keras.layers.Dense(1, input_shape=[window_size])
model = tf.keras.models.Sequential([l0])
# Set the training parameters
model.compile(loss="mse",
optimizer=tf.keras.optimizers.SGD(learning_rate=1e-6, momentum=0.9))
# Train the model
model.fit(dataset, epochs=100)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Model Prediction
# Initialize a list
forecast = []
# Use the model to predict data points per window size
for time in range(len(series) - window_size):
forecast.append(model.predict(series[time:time + window_size][np.newaxis]))
# Slice the points that are aligned with the validation set
forecast = forecast[split_time - window_size:]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Compute the metrics
46.76
5.1
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C4W2L3_DNN_with_Time_Series_Data.ipynb
---------------------------------------------------------------
# Generate the Synthetic Data
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
series += noise(time, noise_level, seed=42)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model_baseline = tf.keras.models.Sequential([
tf.keras.layers.Dense(10, input_shape=[window_size], activation="relu"),
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1)])
model_baseline.summary()
model_baseline.compile(loss='mse',
optimizer=tf.keras.optimizers.SGD(learning_rate=1e-6, momentum=0.9))
model_baseline.fit(dataset, epochs=100)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
forecast =[]
forecast_series = series[split_time - window_size:]
for time in range(len(forecast_series) - window_size):
forecast.append(model_baseline.predict(forecast_series[time:time + window_size][np.newaxis]))
results = np.array(forecast).squeeze()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Compute the metrics
print(tf.keras.metrics.mean_squared_error(x_valid, results).numpy())
print(tf.keras.metrics.mean_absolute_error(x_valid, results).numpy())
45.5331
4.9760103
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Tune the learning rate
model_tune = tf.keras.models.Sequential([
tf.keras.layers.Dense(10, input_shape=[window_size], activation='relu'),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1)])
<<<<<<< HEAD
# Set the learning rate scheduler
lr_schedule = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
model_tune.compile(loss='mse', optimizer=optimizer)
history = model_tune.fit(dataset, epochs=100, callbacks=[lr_schedule])
#Next step is to plot the results of the training.
=======
# Set the learning rate scheduler
lr_schedule = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
model_tune.compile(loss='mse', optimizer=optimizer)
history = model_tune.fit(dataset, epochs=100, callbacks=[lr_schedule])
#Next step is to plot the results of the training.
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model_tune = tf.keras.models.Sequential([
tf.keras.layers.Dense(10, activation='relu', input_shape=[window_size]),
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1)])
optimizer = tf.keras.optimizers.SGD(learning_rate=1e-5, momentum=0.9)
model_tune.compile(loss='mse', optimizer=optimizer)
history = model_tune.fit(dataset, epochs=200)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Plot the loss
# You can get the preictions again and overlay it on the validation set.
# Compute the metrics
42.18
4.81
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C4W2Lw_NN_Predicting_time_series.ipynb
---------------------------------------------------------------
def generate_time_series():
time = np.arange(4 * 365 + 1, dtype="float32")
y_intercept = 10
slope = 0.005
series = trend(time, slope) + y_intercept
amplitude = 50
series += seasonality(time, period=365, amplitude=amplitude)
noise_level = 3
series += noise(time, noise_level, seed=51)
return time, series
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Save all "global" variables within the G class (G stands for global)
@dataclass
class G:
TIME, SERIES = generate_time_series()
SPLIT_TIME = 1100
WINDOW_SIZE = 20
BATCH_SIZE = 32
SHUFFLE_BUFFER_SIZE = 1000
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def train_val_split(time, series, time_step=G.SPLIT_TIME):
time_train = time[:time_step]
series_train = series[:time_step]
time_valid = time[time_step:]
series_valid = series[time_step:]
return time_train, series_train, time_valid, series_valid
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def windowed_dataset(series, window_size=G.WINDOW_SIZE, batch_size=G.BATCH_SIZE, shuffle_buffer=G.SHUFFLE_BUFFER_SIZE):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def create_model(window_size=G.WINDOW_SIZE):
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(20, input_shape=[window_size], activation="relu"),
tf.keras.layers.Dense(20, activation='relu'),
tf.keras.layers.Dense(30, activation='relu'),
tf.keras.layers.Dense(1) ])
model.compile(loss="mse",
optimizer=tf.keras.optimizers.SGD(learning_rate=1e-6, momentum=0.9))
return model
<<<<<<< HEAD
dataset = windowed_dataset(series_train)
model = create_model()
model.fit(dataset, epochs=100)
=======
dataset = windowed_dataset(series_train)
model = create_model()
model.fit(dataset, epochs=100)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def compute_metrics(true_series, forecast):
mse = tf.keras.metrics.mean_squared_error(true_series, forecast).numpy()
mae = tf.keras.metrics.mean_absolute_error(true_series, forecast).numpy()
return mse, mae
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def generate_forecast(series=G.SERIES, split_time=G.SPLIT_TIME, window_size=G.WINDOW_SIZE):
forecast = []
for time in range(len(series) - window_size):
forecast.append(model.predict(series[time:time + window_size][np.newaxis]))
forecast = forecast[split_time - window_size:]
results = np.array(forecast)[:, 0, 0]
return results
<<<<<<< HEAD
# Save the forecast
dnn_forecast = generate_forecast()
mse: 29.33, mae: 3.33 for forecast
=======
# Save the forecast
dnn_forecast = generate_forecast()
mse: 29.33, mae: 3.33 for forecast
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C4W3L1_Simple_RNN.ipynb
---------------------------------------------------------------
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
series += noise(time, noise_level, seed=42)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
<<<<<<< HEAD
# Generate the dataset windows
dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
=======
# Generate the dataset windows
dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model_tune = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[window_size]),
tf.keras.layers.SimpleRNN(40, return_sequences=True),
tf.keras.layers.SimpleRNN(40),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 100)])
model_tune.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
lr_schedule = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10 ** (epoch / 20))
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
model_tune.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer)
history = model_tune.fit(dataset, epochs=100, callbacks=[lr_schedule])
<<<<<<< HEAD
# You can visualize the results and pick an optimal learning rate.
=======
# You can visualize the results and pick an optimal learning rate.
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[window_size]),
tf.keras.layers.SimpleRNN(40, return_sequences=True),
tf.keras.layers.SimpleRNN(40),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 100.0)])
learning_rate = 8e-7
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=['mae'])
history = model.fit(dataset, epochs=100)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
forecast = []
forecast_series = series[split_time - window_size:]
for time in range(len(forecast_series) - window_size):
forecast.append(model.predict(forecast_series[time:time + window_size][np.newaxis]))
results = np.array(forecast).squeeze()
plot_series(time_valid, (x_valid, results))
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def model_forecast(model, series, window_size, batch_size):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda w: w.batch(window_size))
dataset = dataset.batch(batch_size).prefetch(1)
forecast = model.predict(dataset)
return forecast
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
forecast_series = series[split_time - window_size:-1]
forecast = model_forecast(model, forecast_series, window_size, batch_size)
results = forecast.squeeze()
plot_series(time_valid, (x_valid, results))
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Compute the MSE and MAE
print(tf.keras.metrics.mean_squared_error(x_valid, results).numpy())
print(tf.keras.metrics.mean_absolute_error(x_valid, results).numpy())
65.11
6.25
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C4W3L2_LSTM_for_forecasting.ipynb
---------------------------------------------------------------
# Generate the dataset windows
dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model_tune = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[window_size]),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 100.0)])
model_tune.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Set the learning rate scheduler
lr_schedule = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10 ** (epoch / 20))
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
model_tune.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer)
history = model_tune.fit(dataset, epochs=100, callbacks=[lr_schedule])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Define the learning rate array
lrs = 1e-8 * (10 ** (np.arange(100) / 20))
plt.figure(figsize=(10, 6))
plt.grid(True)
plt.semilogx(lrs, history.history["loss"])
plt.tick_params("both", length=10, width=1, which="both")
plt.axis([1e-8, 1e-3, 0, 30])
<<<<<<< HEAD
# Reset states generated by Keras
tf.keras.backend.clear_session()
=======
# Reset states generated by Keras
tf.keras.backend.clear_session()
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[None]),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 100.0)])
learning_rate = 1e-5
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(dataset, epochs=100)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def model_forecast(model, series, window_size, batch_size):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda w: w.batch(window_size))
dataset = dataset.batch(batch_size).prefetch(1)
forecast = model.predict(dataset)
return forecast
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Reduce the original series
forecast_series = series[split_time-window_size:-1]
forecast = model_forecast(model, forecast_series, window_size, batch_size)
# Drop single dimensional axis
results = forecast.squeeze()
plot_series(time_valid, (x_valid, results))
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Compute the MSE and MAE
print(tf.keras.metrics.mean_squared_error(x_valid, results).numpy())
print(tf.keras.metrics.mean_absolute_error(x_valid, results).numpy())
58.02
5.48
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C4W3Lw_RNNs_predict_time_series.ipynb
---------------------------------------------------------------
def generate_time_series():
time = np.arange(4 * 365 + 1, dtype="float32")
y_intercept = 10
slope = 0.005
series = trend(time, slope) + y_intercept
amplitude = 50
series += seasonality(time, period=365, amplitude=amplitude)
noise_level = 3
series += noise(time, noise_level, seed=51)
return time, series
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
@dataclass
class G:
TIME, SERIES = generate_time_series()
SPLIT_TIME = 1100
WINDOW_SIZE = 20
BATCH_SIZE = 32
SHUFFLE_BUFFER_SIZE = 1000
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def windowed_dataset(series, window_size=G.WINDOW_SIZE, batch_size=G.BATCH_SIZE, shuffle_buffer=G.SHUFFLE_BUFFER_SIZE):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size +1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
dataset = windowed_dataset(series_train)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def create_uncompiled_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[None]),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 100.0) ])
return model
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def adjust_learning_rate():
model = create_uncompiled_model()
lr_schedule = tf.keras.callbacks.LearningRateScheduler(lambda epoch: 1e-6 * 10**(epoch / 20))
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(dataset, epochs=100, callbacks=[lr_schedule])
return history
lr_history = adjust_learning_rate()
<<<<<<< HEAD
plt.semilogx(lr_history.history["lr"], lr_history.history["loss"])
plt.axis([1e-6, 1, 0, 30 ])
=======
plt.semilogx(lr_history.history["lr"], lr_history.history["loss"])
plt.axis([1e-6, 1, 0, 30 ])
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def create_model():
tf.random.set_seed(51)
model = create_uncompiled_model()
learning_rate = 1e-5
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
return model
<<<<<<< HEAD
model = create_model()
history = model.fit(dataset, epochs=50)
=======
model = create_model()
history = model.fit(dataset, epochs=50)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def compute_metrics(true_series, forecast):
mse = tf.keras.metrics.mean_squared_error(true_series, forecast).numpy()
mae = tf.keras.metrics.mean_absolute_error(true_series, forecast).numpy()
return mse, mae
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Compute the forecast for all the series
rnn_forecast = model_forecast(model, G.SERIES, G.WINDOW_SIZE).squeeze()
# Slize the forecast to get only the predictions for the validation set
rnn_forecast = rnn_forecast[G.SPLIT_TIME - G.WINDOW_SIZE:-1]
<<<<<<< HEAD
mse, mae = compute_metrics(series_valid, rnn_forecast)
mse: 28.74, mae: 3.21 for forecast
=======
mse, mae = compute_metrics(series_valid, rnn_forecast)
mse: 28.74, mae: 3.21 for forecast
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
C4W4L1_Convolutions_with_LSTMs.ipynb
---------------------------------------------------------------
# Create the series
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
# Update with noise
series += noise(time, noise_level, seed=42)
# Plot the results
plot_series(time, series, xlabel="Time", ylabel="Value")
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Define the split time
split_time = 1000
# Get the train set
time_train = time[:split_time]
x_train = series[:split_time]
# Get the validation set
time_valid = time[split_time:]
x_valid = series[split_time:]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Parameters
window_size = 20
batch_size = 16
shuffle_buffer_size = 1000
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
# Generate a TF Dataset from the series values
dataset = tf.data.Dataset.from_tensor_slices(series)
# Window the data but only take those with the specified size
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
# Flatten the windows my putting its elements in a single batch
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
# Create tuples with features and labels
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
# Shuffle the windows
dataset = dataset.shuffle(shuffle_buffer)
# Create batches of windows
dataset = dataset.batch(batch_size). prefetch(1)
return dataset
<<<<<<< HEAD
train_set = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
=======
train_set = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=64, kernel_size=3,
strides=1, padding="causal",
activation="relu",
input_shape=[window_size, 1]),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.LSTM(64),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)])
model.summary()
<<<<<<< HEAD
init_weights = model.get_weights()
=======
init_weights = model.get_weights()
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
lr_schedule = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer)
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Reset states generated by Keras
tf.keras.backend.clear_session()
# Reset the weights
model.set_weights(init_weights)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
learning_rate = 5e-7
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=500)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
mae = history.history['mae']
loss = history.history['loss']
epochs = range(len(loss))
plot_series(
x=epochs,
y=(mae, loss),
title="MAE and Loss",
xlabel="Epochs",
legend=["MAE", "Loss"] )
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
def model_forecast(model, series, window_size, batch_size):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda w: w.batch(window_size))
dataset = dataset.batch(batch_size).prefetch(1)
forecast = model.predict(dataset)
return forecast
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
forecast_series = series[split_time-window_size:-1]
forecast = model_forecast(model, forecast_series, window_size, batch_size)
results = forecast.squeeze()
plot_series(time_valid, (x_valid, results))
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
print(tf.keras.metrics.mean_squared_error(x_valid, results).numpy())
print(tf.keras.metrics.mean_absolute_error(x_valid, results).numpy())
45.73067
4.9953823
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
C4W4L2_Sunspots_DNN_only.ipynb
---------------------------------------------------------------
!head Sunspots.csv
,Date,Monthly Mean Total Sunspot Number
0,1749-01-31,96.7
1,1749-02-28,104.3 …
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
split_time = 3000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
window_size = 30
batch_size = 32
shuffle_buffer_size = 1000
train_set = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(30, input_shape=[window_size], activation='relu'),
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1)])
model.summary()
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
lr_schedule = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer)
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
tf.keras.backend.clear_session()
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(30, input_shape=[window_size], activation='relu'),
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1) ])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
learning_rate = 1e-5
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
forecast_series = series[split_time - window_size :-1]
forecast = model_forecast(model, forecast_series, window_size, batch_size)
results = forecast.squeeze()
plot_series(time_valid , (x_valid, results))
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
print(tf.keras.metrics.mean_absolute_error(x_valid, results).numpy())
14.480816
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C4W4L3_Sunspots_CNN_RNN_DNN.ipynb
---------------------------------------------------------------
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu',
padding="causal",
input_shape=[window_size, 1]),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.LSTM(64),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)])
model.summary()
<<<<<<< HEAD
# Train the model
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
=======
# Train the model
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
# Reset states generated by Keras
tf.keras.backend.clear_session()
# Reset the weights
model.set_weights(init_weights)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
learning_rate = 3e-7
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100)
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
mae = history.history['mae']
loss = history.history['loss']
epochs = range(len(loss))
plot_series(
x=epochs,
y=(mae, loss),
title="MAE and Loss",
xlabel="MAE",
ylabel='Loss',
legend=["MAE", "Loss"])
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
forecast_series = series[split_time - window_size :-1]
forecast = model_forecast(model, forecast_series, window_size, batch_size)
results = forecast.squeeze()
plot_series(time_valid, (x_valid, results))
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
print(tf.keras.metrics.mean_absolute_error(x_valid, results).numpy())
14.986037
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
C4W4_Assignment_min_temp_Melbourne.ipynb
---------------------------------------------------------------
@dataclass
class G:
TEMPERATURES_CSV = './data/daily-min-temperatures.csv'
times, temperatures = parse_data_from_file(TEMPERATURES_CSV)
TIME = np.array(times)
SERIES = np.array(temperatures)
SPLIT_TIME = 2500
WINDOW_SIZE = 64
BATCH_SIZE = 32
SHUFFLE_BUFFER_SIZE = 1000
def windowed_dataset(series, window_size=G.WINDOW_SIZE, batch_size=G.BATCH_SIZE, shuffle_buffer=G.SHUFFLE_BUFFER_SIZE):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[-1]))
ds = ds.batch(batch_size).prefetch(1)
return ds
train_set = windowed_dataset(series_train, window_size=G.WINDOW_SIZE, batch_size=G.BATCH_SIZE, shuffle_buffer=G.SHUFFLE_BUFFER_SIZE)
def create_uncompiled_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=64,
kernel_size=3,
strides=1,
activation="relu",
padding="causal",
input_shape=[G.WINDOW_SIZE, 1]),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.LSTM(64),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(5, activation="relu"),
tf.keras.layers.Dense(1) ])
return model
def adjust_learning_rate(dataset):
model = create_uncompiled_model()
lr_schedule = tf.keras.callbacks.LearningRateScheduler(lambda epoch: 1e-4 * 10**(epoch / 20))
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(dataset, epochs=100, callbacks=[lr_schedule])
return history
lr_history = adjust_learning_rate(train_set)
def create_model():
model = create_uncompiled_model()
learning_rate = 1e-3
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
return model
model = create_model()
history = model.fit(train_set, epochs=50)
def compute_metrics(true_series, forecast):
mse = tf.keras.metrics.mean_squared_error(true_series, forecast).numpy()
mae = tf.keras.metrics.mean_absolute_error(true_series, forecast).numpy()
return mse, mae
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
# Compute the forecast for all the series
rnn_forecast = model_forecast(model, G.SERIES, G.WINDOW_SIZE).squeeze()
# Slice the forecast to get only the predictions for the validation set
rnn_forecast = rnn_forecast[G.SPLIT_TIME - G.WINDOW_SIZE:-1]
mse, mae = compute_metrics(series_valid, rnn_forecast)
print(f"mse: {mse:.2f}, mae: {mae:.2f} for forecast")
mse: 5.66, mae: 1.86
<<<<<<< HEAD
=======
>>>>>>> d6f7984e73fa0d2e71da759b536b3b74bc1c28c1
<<<<<<< HEAD
!_!
import tensorflow as tf
import numpy as np
from tensorflow import keras
print(tf.__version__)
# Build a simple Sequential model
model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
# Complite the model
model.compile(optimizer='sgd', loss='mean_squared_error')
# Declare model inputs and outputs for training
xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)
# Train the model
model.fit(xs, ys, epochs=500)
# Make a predition
print(model.predict([10.0]))
!_!
import tensorflow as tf
import numpy as np
from tensorflow import keras
print(tf.__version__)
# Build a simple Sequential model
model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
# Complite the model
model.compile(optimizer='sgd', loss='mean_squared_error')
# Declare model inputs and outputs for training
xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)
# Train the model
model.fit(xs, ys, epochs=500)
# Make a predition
print(model.predict([10.0]))
!_!
# grader-required-cell
import tensorflow as tf
import numpy as np
# grader-required-cell
# GRADED FUNCTION: house_model
def house_model():
### START CODE HERE
# Define input and output tensors with the values for houses with 1 up to 6 bedrooms
# Hint: Remember to explictly set the dtype as float
xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)
ys = np.array([1.0, 1.5, 2.0, (3.5-0.5), (4.0-1), (4.5-1.2)], dtype=float)
# Define your model (should be a model with 1 dense layer and 1 unit)
# Note: you can use `tf.keras` instead of `keras`
model = tf.keras.Sequential([tf.keras.layers.Dense(units=1, input_shape=[1])])
# Compile your model
# Set the optimizer to Stochastic Gradient Descent
# and use Mean Squared Error as the loss function
model.compile(optimizer='sgd', loss='mean_squared_error')
# Train your model for 1000 epochs by feeding the i/o tensors
model.fit(xs, ys, shuffle=True, epochs=1000)
### END CODE HERE
return model
# grader-required-cell
# Get your trained model
model = house_model()
# grader-required-cell
new_x = 7.0
prediction = model.predict([new_x])[0]
print(prediction)
[4.0094547]
!_!
import tensorflow as tf
from tensorflow import keras
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
!_!
# ------------------------
# LOAD DATASET
# ------------------------
import tensorflow as tf
# print(tf.__version__)
# Load the Fashion MNIST dataset
fmnist = tf.keras.datasets.fashion_mnist
# Load the training and split of the Fashion MNIST dataset
(training_images, training_labels), (test_images, test_labels) = fmnist.load_data()
# ------------------------
# vISUALISATE DATA
# ------------------------
import numpy as np
import matplotlib.pyplot as plt
# You can put between 0 to 59999 here
# index = 0
index = 42
# Set number of characters per row when printing
np.set_printoptions(linewidth=320)
# Print the label and image
print(f'LABEL: {training_labels[index]}')
print(f'\nIMAGE PIXEL ARRAY:\n {training_images[index]}')
# Visualize the image
plt.imshow(training_images[index], cmap='Greys')
!_!
# ------------------------
# CREATE MODEL
# ------------------------
# Normalize the pixel vlues of the train and test images
training_images = training_images / 255
test_images = test_images / 255
# Build the classification model
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10,activation=tf.nn.softmax)])
# ------------------------
# HOW WORKS SOFTMAX
# ------------------------
# Softmax берет список значений и масштабирует их так, чтобы сумма всех элементов была равна 1.
# Применительно к выходным данным модели вы можете думать о масштабированных значениях как о вероятности
# для этого класса. Например, в вашей модели классификации, которая имеет 10 единиц в выходном плотном слое,
# самое высокое значение при индексе = 4 означает, что модель наиболее уверена в том, что входное
# изображение одежды является пальто. Если он имеет индекс = 5, то это сандалии и так далее.
# См. приведенный ниже блок сокращенного кода, демонстрирующий эти концепции. Вы также можете
# посмотреть эту лекцию, если хотите узнать больше о функции Softmax и о том, как вычисляются значения.
# Declare sample imputs and convert to a tensor
inputs = np.array([[1.0, 3.0, 4.0, 2.0]])
inputs = tf.convert_to_tensor(inputs)
print(f'input to softmax function: {inputs.numpy()}')
# Feed the inputs to a softmax activation function
outputs = tf.keras.activations.softmax(inputs)
print(f'output of softmax functiion: {outputs.numpy()}')
# Get the sum of all values after the softmax
sum = tf.reduce_sum(outputs)
print(f'sum of outputs: {sum}')
# Get the index with highest value
prediction = np.argmax(outputs)
print(f'class with highest probability: {prediction}')
!_!
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
# Evaluate the model on unseen data
model.evaluate(test_images, test_labels)
# ++++++++++++++++++++++++++++++++++++
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy') >= 0.6): # Experiment with changing this value
print("\nReached 60% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
model.fit(training_images, training_labels, epochs=5, callbacks=[callbacks])
!_!
# next code
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])